hvm: Cannot use ring_3() macro on HVM guests. It does not work because
authorKeir Fraser <keir.fraser@citrix.com>
Thu, 27 Dec 2007 10:41:43 +0000 (10:41 +0000)
committerKeir Fraser <keir.fraser@citrix.com>
Thu, 27 Dec 2007 10:41:43 +0000 (10:41 +0000)
the CS field is not saved/restored and also because CS.RPL does not
always equal the DPL (e.g., when executing in real mode).

Instead we must interrogate SS.DPL, or CPL directly (SVM supports this).

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
xen/arch/x86/hvm/hvm.c
xen/arch/x86/hvm/instrlen.c
xen/arch/x86/hvm/platform.c
xen/arch/x86/hvm/svm/svm.c
xen/arch/x86/hvm/vmx/vmx.c
xen/arch/x86/mm/shadow/common.c
xen/arch/x86/mm/shadow/multi.c
xen/arch/x86/mm/shadow/private.h

index f0575afdabfe01bf5494f0aeabbe62cd4ea6fc68..bce02f0c05cd3f6729b4c8f1172650417e0325c0 100644 (file)
@@ -1272,15 +1272,18 @@ void hvm_task_switch(
 static int __hvm_copy(void *buf, paddr_t addr, int size, int dir, 
                       int virt, int fetch)
 {
+    struct segment_register sreg;
     unsigned long gfn, mfn;
     p2m_type_t p2mt;
     char *p;
     int count, todo;
     uint32_t pfec = PFEC_page_present;
 
+    hvm_get_segment_register(current, x86_seg_ss, &sreg);
+
     if ( dir ) 
         pfec |= PFEC_write_access;
-    if ( ring_3(guest_cpu_user_regs()) )
+    if ( sreg.attr.fields.dpl == 3 )
         pfec |= PFEC_user_mode;
     if ( fetch ) 
         pfec |= PFEC_insn_fetch;
@@ -1514,6 +1517,7 @@ static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = {
 
 int hvm_do_hypercall(struct cpu_user_regs *regs)
 {
+    struct segment_register sreg;
     int flush, mode = hvm_guest_x86_mode(current);
     uint32_t eax = regs->eax;
 
@@ -1524,7 +1528,8 @@ int hvm_do_hypercall(struct cpu_user_regs *regs)
 #endif
     case 4:
     case 2:
-        if ( unlikely(ring_3(regs)) )
+        hvm_get_segment_register(current, x86_seg_ss, &sreg);
+        if ( unlikely(sreg.attr.fields.dpl == 3) )
         {
     default:
             regs->eax = -EPERM;
index d258e744dc60ca058922893658148ec259384247..7e6353d2e9183ff23c494cc44169872e024eb1f7 100644 (file)
@@ -192,15 +192,15 @@ static uint8_t twobyte_table[256] = {
        return -1;                                                         \
    if ( inst_copy_from_guest(&_x, pc, 1) != 1 ) {                         \
        unsigned long err;                                                 \
-       struct segment_register cs;                                        \
+       struct segment_register ss;                                        \
        gdprintk(XENLOG_WARNING,                                           \
                 "Cannot read from address %lx (eip %lx, mode %d)\n",      \
                 pc, org_pc, address_bytes);                               \
        err = 0; /* Must be not-present: we don't enforce reserved bits */ \
        if ( hvm_nx_enabled(current) )                                     \
            err |= PFEC_insn_fetch;                                        \
-       hvm_get_segment_register(current, x86_seg_cs, &cs);                \
-       if ( cs.attr.fields.dpl != 0 )                                     \
+       hvm_get_segment_register(current, x86_seg_ss, &ss);                \
+       if ( ss.attr.fields.dpl == 3 )                                     \
            err |= PFEC_user_mode;                                         \
        hvm_inject_exception(TRAP_page_fault, err, pc);                    \
        return -1;                                                         \
index 66c30d74fd298b3365c3a4680225a49fa1c6d85f..c210fa5d7160aa8eab333839c7437779513838b0 100644 (file)
@@ -1074,6 +1074,7 @@ void handle_mmio(paddr_t gpa)
 
     case INSTR_MOVS:
     {
+        struct segment_register sreg;
         unsigned long count = GET_REPEAT_COUNT();
         int sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1;
         unsigned long addr, gfn; 
@@ -1089,7 +1090,8 @@ void handle_mmio(paddr_t gpa)
             addr &= 0xFFFF;
         addr += hvm_get_segment_base(v, x86_seg_es);        
         pfec = PFEC_page_present | PFEC_write_access;
-        if ( ring_3(regs) )
+        hvm_get_segment_register(v, x86_seg_ss, &sreg);
+        if ( sreg.attr.fields.dpl == 3 )
             pfec |= PFEC_user_mode;
         gfn = paging_gva_to_gfn(v, addr, &pfec);
         paddr = (paddr_t)gfn << PAGE_SHIFT | (addr & ~PAGE_MASK);
index ca935e1458d7a43d57e1868e94de9e2488cef2f5..ca70979edb8a75508cadb8135c0e3b2df714d1b3 100644 (file)
@@ -1440,7 +1440,7 @@ static void svm_io_instruction(struct vcpu *v)
         pfec = PFEC_page_present;
         if ( dir == IOREQ_READ ) /* Read from PIO --> write to RAM */
             pfec |= PFEC_write_access;
-        if ( ring_3(regs) )
+        if ( vmcb->cpl == 3 )
             pfec |= PFEC_user_mode;
         gfn = paging_gva_to_gfn(v, addr, &pfec);
         if ( gfn == INVALID_GFN ) 
index ee608600a3d79b618b1403d4497ea160a5703d0a..d1a58991d6da57e33e4433b7e1b1d1078a2210f8 100644 (file)
@@ -1754,7 +1754,7 @@ static void vmx_do_str_pio(unsigned long exit_qualification,
     pfec = PFEC_page_present;
     if ( dir == IOREQ_READ ) /* Read from PIO --> write to RAM */
         pfec |= PFEC_write_access;
-    if ( ring_3(regs) )
+    if ( ((__vmread(GUEST_SS_AR_BYTES) >> 5) & 3) == 3 )
         pfec |= PFEC_user_mode;
     gfn = paging_gva_to_gfn(current, addr, &pfec);
     if ( gfn == INVALID_GFN )
index 32215175795ca6b58f6581135631d9db468f958c..191e87feb32d444ae95cc9dba7baf1f32b07ac55 100644 (file)
@@ -101,7 +101,7 @@ int _shadow_mode_refcounts(struct domain *d)
 /* x86 emulator support for the shadow code
  */
 
-static struct segment_register *hvm_get_seg_reg(
+struct segment_register *hvm_get_seg_reg(
     enum x86_segment seg, struct sh_emulate_ctxt *sh_ctxt)
 {
     struct segment_register *seg_reg = &sh_ctxt->seg_reg[seg];
@@ -141,6 +141,7 @@ hvm_read(enum x86_segment seg,
          enum hvm_access_type access_type,
          struct sh_emulate_ctxt *sh_ctxt)
 {
+    struct segment_register *sreg;
     unsigned long addr;
     int rc, errcode;
 
@@ -163,7 +164,8 @@ hvm_read(enum x86_segment seg,
      * was mapped here.  This should never happen: we're here because
      * of a write fault at the end of the instruction we're emulating. */ 
     SHADOW_PRINTK("read failed to va %#lx\n", addr);
-    errcode = ring_3(sh_ctxt->ctxt.regs) ? PFEC_user_mode : 0;
+    sreg = hvm_get_seg_reg(x86_seg_ss, sh_ctxt);
+    errcode = (sreg->attr.fields.dpl == 3) ? PFEC_user_mode : 0;
     if ( access_type == hvm_access_insn_fetch )
         errcode |= PFEC_insn_fetch;
     hvm_inject_exception(TRAP_page_fault, errcode, addr + bytes - rc);
index fe74ec566f04ee44cfaf9f7a871160c6cb785dee..a7cef75b01cce716608c690e6c40b7dc1e91f3b2 100644 (file)
@@ -4018,16 +4018,18 @@ static mfn_t emulate_gva_to_mfn(struct vcpu *v,
 
 /* Check that the user is allowed to perform this write. 
  * Returns a mapped pointer to write to, or NULL for error. */
-static void * emulate_map_dest(struct vcpu *v,
-                               unsigned long vaddr,
-                               u32 bytes,
-                               struct sh_emulate_ctxt *sh_ctxt)
+static void *emulate_map_dest(struct vcpu *v,
+                              unsigned long vaddr,
+                              u32 bytes,
+                              struct sh_emulate_ctxt *sh_ctxt)
 {
+    struct segment_register *sreg;
     unsigned long offset;
     void *map = NULL;
 
     /* We don't emulate user-mode writes to page tables */
-    if ( ring_3(sh_ctxt->ctxt.regs) ) 
+    sreg = hvm_get_seg_reg(x86_seg_ss, sh_ctxt);
+    if ( sreg->attr.fields.dpl == 3 )
         return NULL;
 
     sh_ctxt->mfn1 = emulate_gva_to_mfn(v, vaddr, sh_ctxt);
index 541177d2debe243015ea98d0def3121b7e6cbfc7..72a41d9a74ff406dda05efc7aa937ce7ffac8a2b 100644 (file)
@@ -680,7 +680,8 @@ struct x86_emulate_ops *shadow_init_emulation(
     struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs);
 void shadow_continue_emulation(
     struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs);
-
+struct segment_register *hvm_get_seg_reg(
+    enum x86_segment seg, struct sh_emulate_ctxt *sh_ctxt);
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
 /**************************************************************************/